bitkeeper revision 1.22 (3e3a9049ZUW-GlNZmkFtI9Ae2r4zJg)
authorbd240@boulderdash.cl.cam.ac.uk <bd240@boulderdash.cl.cam.ac.uk>
Fri, 31 Jan 2003 15:03:37 +0000 (15:03 +0000)
committerbd240@boulderdash.cl.cam.ac.uk <bd240@boulderdash.cl.cam.ac.uk>
Fri, 31 Jan 2003 15:03:37 +0000 (15:03 +0000)
Finally, full & working & tested xeno with userspace domain building... Go and boot whatever :)

xen-2.4.16/common/dom0_ops.c
xen-2.4.16/common/domain.c
xen-2.4.16/common/memory.c
xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_core.c
xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_memory.c

index 31d0b992a82d4f1efd6bf23951d36b7c5120ed89..ddfb454e8b4be244cb453c34e02437f472e0d119 100644 (file)
@@ -41,16 +41,16 @@ static void build_page_list(struct task_struct *p)
     curr = p->pg_head;
     *list++ = p->pg_head;
     page = (frame_table + p->pg_head)->next;
-    printk(KERN_ALERT "bd240 debug: list %lx, page num %lx\n", list, page);
     while(page != p->pg_head){
         if(!((unsigned long)list & (PAGE_SIZE-1))){
-            printk(KERN_ALERT "bd240 debug: list %lx, page num %lx\n", list, page);
             curr = (frame_table + curr)->next;
+            unmap_domain_mem((unsigned long)(list-1) & PAGE_MASK);
             list = (unsigned long *)map_domain_mem(curr << PAGE_SHIFT);
         }
         *list++ = page;
         page = (frame_table + page)->next;
     }
+    unmap_domain_mem((unsigned long)(list-1) & PAGE_MASK);
 }
     
 long do_dom0_op(dom0_op_t *u_dom0_op)
@@ -99,17 +99,18 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
          * needs to be allocated
          */
         if(dom != 0){
+
             if(alloc_new_dom_mem(p, op.u.newdomain.memory_kb) != 0){
                 ret = -1;
                 break;
             }
             build_page_list(p);
+            
             ret = p->domain;
 
             op.u.newdomain.domain = ret;
             op.u.newdomain.pg_head = p->pg_head;
             copy_to_user(u_dom0_op, &op, sizeof(op));
-            printk(KERN_ALERT "bd240 debug: hyp dom0_ops: %lx, %d\n", op.u.newdomain.pg_head, op.u.newdomain.memory_kb);
 
             break;
         }
index 8fd666ae5c03e703149bb4d1f991dd375fc5ac9b..96088948329ffc6e22b320b3c1792c024be6040e 100644 (file)
@@ -361,6 +361,7 @@ unsigned int alloc_new_dom_mem(struct task_struct *p, unsigned int kbytes)
     temp = temp->next;
     list_del(&pf->list);
     pf->next = pf->prev = p->pg_head = (pf - frame_table);
+    pf->type_count = pf->tot_count = 0;
     free_pfns--;
     pf_head = pf;
 
@@ -375,6 +376,7 @@ unsigned int alloc_new_dom_mem(struct task_struct *p, unsigned int kbytes)
         pf->prev = pf_head->prev;
         (frame_table + pf_head->prev)->next = (pf - frame_table);
         pf_head->prev = (pf - frame_table);
+        pf->type_count = pf->tot_count = 0;
 
         free_pfns--;
     }
@@ -409,7 +411,7 @@ int final_setup_guestos(struct task_struct * p, dom_meminfo_t * meminfo)
     start_info_t * virt_startinfo_addr;
     unsigned long virt_stack_addr;
     unsigned long long time;
-    unsigned long phys_l1tab, phys_l2tab;
+    unsigned long phys_l2tab;
     page_update_request_t * pgt_updates;
     unsigned long curr_update_phys;
     unsigned long count;
@@ -432,6 +434,7 @@ int final_setup_guestos(struct task_struct * p, dom_meminfo_t * meminfo)
             pgt_updates = (page_update_request_t *)map_domain_mem(curr_update_phys);
         }
     }
+    unmap_domain_mem((void *)((unsigned long)(pgt_updates-1) & PAGE_MASK));
 
     /* entries 0xe0000000 onwards in page table must contain hypervisor
      * mem mappings - set them up.
@@ -452,12 +455,11 @@ int final_setup_guestos(struct task_struct * p, dom_meminfo_t * meminfo)
     phys_l2tab = pagetable_val(p->mm.pagetable); 
     l2tab = map_domain_mem(phys_l2tab);
     l2tab += l2_table_offset(meminfo->virt_shinfo_addr);
-    phys_l1tab = l2_pgentry_to_phys(*l2tab) + 
-        (l1_table_offset(meminfo->virt_shinfo_addr) * sizeof(l1_pgentry_t));
-    l1tab = map_domain_mem(phys_l1tab);
+    l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
+    l1tab += l1_table_offset(meminfo->virt_shinfo_addr);
     *l1tab = mk_l1_pgentry(__pa(p->shared_info) | L1_PROT);
-    unmap_domain_mem(l2tab);
-    unmap_domain_mem(l1tab);
+    unmap_domain_mem((void *)((unsigned long)l2tab & PAGE_MASK));
+    unmap_domain_mem((void *)((unsigned long)l1tab & PAGE_MASK));
 
     /* set up the shared info structure */
     rdtscll(time);
@@ -535,8 +537,7 @@ int final_setup_guestos(struct task_struct * p, dom_meminfo_t * meminfo)
     __asm__ __volatile__ (
         "mov %%eax,%%cr3" : : "a" (pagetable_val(current->mm.pagetable)));    
     __sti();
-
-
+    
     new_thread(p, 
                (unsigned long)meminfo->virt_load_addr, 
                (unsigned long)virt_stack_addr, 
@@ -570,7 +571,7 @@ int setup_guestos(struct task_struct *p, dom0_newdomain_t *params)
     unsigned long alloc_index;
     unsigned long ft_pages;
     l2_pgentry_t *l2tab, *l2start;
-    l1_pgentry_t *l1tab = NULL;
+    l1_pgentry_t *l1tab = NULL, *l1start = NULL;
     struct pfn_info *page = NULL;
     net_ring_t *net_ring;
     net_vif_t *net_vif;
@@ -627,53 +628,74 @@ int setup_guestos(struct task_struct *p, dom0_newdomain_t *params)
      * and frame table struct.
      */
 
-    ft_pages = (frame_table_size + (PAGE_SIZE - 1)) << PAGE_SHIFT;
+    ft_pages = frame_table_size >> PAGE_SHIFT;
     l2tab += l2_table_offset(virt_load_address);
     cur_address = p->pg_head << PAGE_SHIFT;
     for ( count  = 0;
-          count < p->tot_pages + 1;
+          count < p->tot_pages + 1 + ft_pages;
           count++)
     {
         if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) )
         {
-            if ( l1tab != NULL ) unmap_domain_mem(l1tab-1);
+            if ( l1tab != NULL ) unmap_domain_mem(l1start);
             phys_l1tab = alloc_page_from_domain(&alloc_address, &alloc_index);
             *l2tab++ = mk_l2_pgentry(phys_l1tab|L2_PROT);
-            l1tab = map_domain_mem(phys_l1tab);
+            l1start = l1tab = map_domain_mem(phys_l1tab);
             clear_page(l1tab);
             l1tab += l1_table_offset(
                 virt_load_address + (count << PAGE_SHIFT));
         }
+        *l1tab++ = mk_l1_pgentry(cur_address|L1_PROT);
         
-        if( count < alloc_index )
+        if(count < p->tot_pages)
         {
-            *l1tab++ = mk_l1_pgentry(cur_address|L1_PROT);
             page = frame_table + (cur_address >> PAGE_SHIFT);
             page->flags = dom | PGT_writeable_page;
             page->type_count = page->tot_count = 1;
-        } 
-        else 
-        {
-            *l1tab++ = mk_l1_pgentry((cur_address|L1_PROT) & ~_PAGE_RW);
-            page = frame_table + (cur_address >> PAGE_SHIFT);
-            page->flags = dom | PGT_l1_page_table;
-            page->type_count = 1;
-            page->tot_count = 2; 
         }
 
         cur_address = ((frame_table + (cur_address >> PAGE_SHIFT))->next) << PAGE_SHIFT;
     }
-    unmap_domain_mem(l1tab-1);
-    page = frame_table + (frame_table + p->pg_head)->prev; 
+    unmap_domain_mem(l1start);
+
+    /* pages that are part of page tables must be read only */
+    cur_address = p->pg_head << PAGE_SHIFT;
+    for(count = 0;
+        count < alloc_index;
+        count++){
+        cur_address = ((frame_table + (cur_address >> PAGE_SHIFT))->next) << PAGE_SHIFT;
+    }
+
+    l2tab = l2start + l2_table_offset(virt_load_address + 
+        (alloc_index << PAGE_SHIFT));
+    l1start = l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
+    l1tab += l1_table_offset(virt_load_address + (alloc_index << PAGE_SHIFT));
+    l2tab++;
+    for(count = alloc_index;
+        count < p->tot_pages;
+        count++){
+        *l1tab++ = mk_l1_pgentry(l1_pgentry_val(*l1tab) & ~_PAGE_RW);
+        if(!((unsigned long)l1tab & (PAGE_SIZE - 1))){
+            unmap_domain_mem(l1start);
+            l1start = l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
+            l2tab++;
+        }
+        page = frame_table + (cur_address >> PAGE_SHIFT);
+        page->flags = dom | PGT_l1_page_table;
+        page->tot_count++;
+        
+        cur_address = ((frame_table + (cur_address >> PAGE_SHIFT))->next) << PAGE_SHIFT;
+    }
     page->flags = dom | PGT_l2_page_table;
+    unmap_domain_mem(l1start);
 
     /* Map in the the shared info structure. */
     virt_shinfo_address = virt_load_address + (p->tot_pages << PAGE_SHIFT); 
     l2tab = l2start + l2_table_offset(virt_shinfo_address);
-    l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
+    l1start = l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
     l1tab += l1_table_offset(virt_shinfo_address);
     *l1tab = mk_l1_pgentry(__pa(p->shared_info)|L1_PROT);
-    unmap_domain_mem(l1tab);
+    unmap_domain_mem(l1start);
 
     /* Set up shared info area. */
     rdtscll(time);
@@ -693,10 +715,10 @@ int setup_guestos(struct task_struct *p, dom0_newdomain_t *params)
         cur_address < virt_ftable_end;
         cur_address += PAGE_SIZE){
         l2tab = l2start + l2_table_offset(cur_address);
-        l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
+        l1start = l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
         l1tab += l1_table_offset(cur_address);
         *l1tab = mk_l1_pgentry(__pa(ft_mapping)|L1_PROT);
-        unmap_domain_mem(l1tab);
+        unmap_domain_mem(l1start);
         ft_mapping += PAGE_SIZE;
     }
     
@@ -791,7 +813,6 @@ int setup_guestos(struct task_struct *p, dom0_newdomain_t *params)
     return 0;
 }
 
-
 void __init domain_init(void)
 {
     int i;
@@ -803,16 +824,3 @@ void __init domain_init(void)
         schedule_data[i].curr = &idle0_task;
     }
 }
-
-
-
-#if 0
-    unsigned long s = (mod[        0].mod_start + (PAGE_SIZE-1)) & PAGE_MASK;
-    unsigned long e = (mod[nr_mods-1].mod_end   + (PAGE_SIZE-1)) & PAGE_MASK;
-    while ( s != e ) 
-    { 
-        free_pages((unsigned long)__va(s), 0); 
-        s += PAGE_SIZE;
-    }
-#endif
-
index 4aa9c085d697b8e2b426e7f90dc3ba81ac006fa6..ed913e5752a703276250e7d7948ddccb7501e612 100644 (file)
@@ -269,6 +269,7 @@ static int inc_page_refcnt(unsigned long page_nr, unsigned int type)
                     flags & PG_type_mask, type, page_type_count(page));
             return -1;
         }
+
         page->flags |= type;
     }
 
@@ -332,7 +333,7 @@ static int get_l2_table(unsigned long page_nr)
 {
     l2_pgentry_t *p_l2_entry, l2_entry;
     int i, ret=0;
-    
+   
     ret = inc_page_refcnt(page_nr, PGT_l2_page_table);
     if ( ret != 0 ) return (ret < 0) ? ret : 0;
     
@@ -354,7 +355,7 @@ static int get_l2_table(unsigned long page_nr)
         if ( ret ) ret = get_twisted_l2_table(page_nr, l2_entry);
         if ( ret ) goto out;
     }
-
+    
     /* Now we simply slap in our high mapping. */
     memcpy(p_l2_entry, 
            idle_pg_table[smp_processor_id()] + DOMAIN_ENTRIES_PER_L2_PAGETABLE,
@@ -570,8 +571,6 @@ static int mod_l1_entry(unsigned long pa, l1_pgentry_t new_l1_entry)
               (_PAGE_GLOBAL|_PAGE_PAT)) ) 
         {
 
-            printk(KERN_ALERT "bd240 debug: bad l1 entry val %lx\n", l1_pgentry_val(new_l1_entry) & (_PAGE_GLOBAL | _PAGE_PAT));
-
             MEM_LOG("Bad L1 entry val %04lx",
                     l1_pgentry_val(new_l1_entry) & 
                     (_PAGE_GLOBAL|_PAGE_PAT));
@@ -592,7 +591,6 @@ static int mod_l1_entry(unsigned long pa, l1_pgentry_t new_l1_entry)
             
             if ( get_page(l1_pgentry_to_pagenr(new_l1_entry),
                           l1_pgentry_val(new_l1_entry) & _PAGE_RW) ){
-                printk(KERN_ALERT "bd240 debug: get_page err\n");
                 goto fail;
             }
         } 
@@ -764,30 +762,6 @@ int do_process_page_updates_bh(page_update_request_t * cur, int count)
 
             break;
 
-            /*
-             * PGREQ_UNCHECKED_UPDATE: Make an unchecked update to a
-             * bottom-level page-table entry.
-             * Restrictions apply:
-             *  1. Update only allowed by domain 0.
-             *  2. Update must be to a level-1 pte belonging to dom0.
-             */
-        case PGREQ_UNCHECKED_UPDATE:
-            cur->ptr &= ~(sizeof(l1_pgentry_t) - 1);
-            page = frame_table + pfn;
-            flags = page->flags;
-            if ( (flags | current->domain) == PGT_l1_page_table )
-            {
-                
-                *(unsigned long *)map_domain_mem(cur->ptr) = cur->val;
-                err = 0;
-            }
-            else
-            {
-                MEM_LOG("UNCHECKED_UPDATE: Bad domain %d, or"
-                        " bad pte type %08lx", current->domain, flags);
-            }
-            break;
-
             /*
              * PGREQ_EXTENDED_COMMAND: Extended command is specified
              * in the least-siginificant bits of the 'value' field.
@@ -804,7 +778,6 @@ int do_process_page_updates_bh(page_update_request_t * cur, int count)
 
         if ( err )
         {
-            page = frame_table + (cur->ptr >> PAGE_SHIFT);
             kill_domain_with_errmsg("Illegal page update request");
         }
 
index d9c33da28370910b47fbabbd8d914f1de4cab878..e4bb8260976cf52ffd40215cb2f80f32de8e7dc3 100644 (file)
@@ -1,3 +1,4 @@
+
 /******************************************************************************
  * dom0_core.c
  * 
@@ -40,6 +41,8 @@
 #define DOM_DIR         "dom"
 #define DOM_MEM         "mem"
 
+#define MAP_DISCONT     1
+
 frame_table_t * frame_table;
 
 static struct proc_dir_entry *xeno_base;
@@ -65,7 +68,6 @@ static int cmd_read_proc(char *page, char **start, off_t off,
 static void create_proc_dom_entries(int dom)
 {
     struct proc_dir_entry * dir;
-    struct proc_dir_entry * file;
     dom_procdata_t * dom_data;
     char dir_name[MAX_LEN];
 
@@ -104,12 +106,8 @@ static ssize_t dom_mem_read(struct file * file, char * buff, size_t size, loff_t
 
     /* remap the range using xen specific routines */
 
-    printk(KERN_ALERT "bd240 debug: dmw entered %lx, %lx\n", mem_data->pfn, mem_data->tot_pages);
-
-    addr = direct_mmap(mem_data->pfn << PAGE_SHIFT, mem_data->tot_pages << PAGE_SHIFT, prot, 0, 0);
+    addr = direct_mmap(mem_data->pfn << PAGE_SHIFT, mem_data->tot_pages << PAGE_SHIFT, prot, MAP_DISCONT, mem_data->tot_pages);
     
-    printk(KERN_ALERT "bd240 debug: dmw exit %lx, %lx\n", mem_data->pfn, mem_data->tot_pages);
-
     copy_to_user((unsigned long *)buff, &addr, sizeof(addr));
 
     return sizeof(addr);
@@ -215,8 +213,6 @@ static int cmd_write_proc(struct file *file, const char *buffer,
             params->num_vifs = op.u.newdomain.num_vifs;
             params->domain = op.u.newdomain.domain;
 
-            printk(KERN_ALERT "bd240 debug: cmd_write: %lx, %d, %d\n", params->pg_head, params->memory_kb, params->domain); 
-
             /* now notify user space of the new domain's id */
             new_dom_id = create_proc_entry(DOM0_NEWDOM, 0600, xeno_base);
             if ( new_dom_id != NULL )
index 46b44deef3c36d8732e11df27816ba9d9b5838da..19a0ce7667c54ea5f5dc7a4ef9a7bf9111725505 100644 (file)
@@ -27,9 +27,9 @@ extern struct list_head * find_direct(struct list_head *, unsigned long);
  * management applications such as domain builder etc.
  */
 
-#define direct_set_pte(pteptr, pteval) queue_l1_entry_update(__pa(pteptr) | PGREQ_UNCHECKED_UPDATE, (pteval).pte_low)
+#define direct_set_pte(pteptr, pteval) queue_l1_entry_update(__pa(pteptr), (pteval).pte_low)
 
-#define direct_pte_clear(pteptr) queue_l1_entry_update(__pa(pteptr) | PGREQ_UNCHECKED_UPDATE, 0)
+#define direct_pte_clear(pteptr) queue_l1_entry_update(__pa(pteptr), 0)
 
 #define __direct_pte(x) ((pte_t) { (x) } )
 #define __direct_mk_pte(page_nr,pgprot) __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
@@ -55,6 +55,7 @@ static inline void direct_remappte_range(pte_t * pte, unsigned long address, uns
        do {
                pte_t oldpage;
                oldpage = ptep_get_and_clear(pte);
+
                direct_set_pte(pte, direct_mk_pte_phys(phys_addr, prot));
 
                forget_pte(oldpage);
@@ -122,18 +123,19 @@ int direct_remap_page_range(unsigned long from, unsigned long phys_addr, unsigne
  * used for remapping discontiguous bits of domain's memory, pages to map are
  * found from frame table beginning at the given first_pg index
  */ 
-int direct_remap_disc_page_range(unsigned long from, unsigned long first_pg,
-                int tot_pages, pgprot_t prot)
+int direct_remap_disc_page_range(unsigned long from, 
+                unsigned long first_pg, int tot_pages, pgprot_t prot)
 {
     frame_table_t * current_ft;
     unsigned long current_pfn;
     unsigned long start = from;
     int count = 0;
 
-    current_ft = (frame_table_t *)(frame_table + first_pg);
+    current_ft = frame_table + first_pg;
     current_pfn = first_pg; 
     while(count < tot_pages){
-            if(direct_remap_page_range(start, current_pfn << PAGE_SHIFT, PAGE_SIZE, prot))
+            if(direct_remap_page_range(start, current_pfn << PAGE_SHIFT, 
+                PAGE_SIZE, prot))
                 goto out;
             start += PAGE_SIZE;
             current_pfn = current_ft->next;
@@ -187,10 +189,9 @@ unsigned long direct_mmap(unsigned long phys_addr, unsigned long size,
 
     /* and perform the mapping */
     if(flag == MAP_DISCONT){
-        printk(KERN_ALERT "bd240 debug: call direct_remap_disc_page_range\n");
-        ret = direct_remap_disc_page_range(addr, phys_addr, tot_pages, prot);
+        ret = direct_remap_disc_page_range(addr, phys_addr >> PAGE_SHIFT, 
+            tot_pages, prot);
     } else {
-        printk(KERN_ALERT "bd240 debug: call direct_remap_page_range\n");
         ret = direct_remap_page_range(addr, phys_addr, size, prot);
     }